#include <asm/vcpumask.h>
#include <asm/vmmu.h>
-DEFINE_PER_CPU (unsigned long, vhpt_paddr);
-DEFINE_PER_CPU (unsigned long, vhpt_pend);
+DEFINE_PER_CPU_READ_MOSTLY(unsigned long, vhpt_paddr);
+DEFINE_PER_CPU_READ_MOSTLY(unsigned long, vhpt_pend);
#ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
DEFINE_PER_CPU(volatile u32, vhpt_tlbflush_timestamp);
#endif
{
__per_cpu_start = .;
*(.data.percpu)
+ . = ALIGN(SMP_CACHE_BYTES);
+ *(.data.percpu.read_mostly)
__per_cpu_end = .;
}
. = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
u32 vmx_vmentry_control __read_mostly;
bool_t cpu_has_vmx_ins_outs_instr_info __read_mostly;
-static DEFINE_PER_CPU(struct vmcs_struct *, host_vmcs);
+static DEFINE_PER_CPU_READ_MOSTLY(struct vmcs_struct *, host_vmcs);
static DEFINE_PER_CPU(struct vmcs_struct *, current_vmcs);
static DEFINE_PER_CPU(struct list_head, active_vmcs_list);
extern void vesa_mtrr_init(void);
extern void init_tmem(void);
-DEFINE_PER_CPU(struct desc_struct *, gdt_table) = boot_cpu_gdt_table;
+DEFINE_PER_CPU_READ_MOSTLY(struct desc_struct *, gdt_table) = boot_cpu_gdt_table;
#ifdef CONFIG_COMPAT
-DEFINE_PER_CPU(struct desc_struct *, compat_gdt_table)
+DEFINE_PER_CPU_READ_MOSTLY(struct desc_struct *, compat_gdt_table)
= boot_cpu_compat_gdt_table;
#endif
#endif
string_param("nmi", opt_nmi);
-DEFINE_PER_CPU(u32, ler_msr);
+DEFINE_PER_CPU_READ_MOSTLY(u32, ler_msr);
/* Master table, used by CPU0. */
idt_entry_t idt_table[IDT_ENTRIES];
unmap_domain_page(l1t);
}
-DEFINE_PER_CPU(struct tss_struct *, doublefault_tss);
+DEFINE_PER_CPU_READ_MOSTLY(struct tss_struct *, doublefault_tss);
static unsigned char __attribute__ ((__section__ (".bss.page_aligned")))
boot_cpu_doublefault_space[PAGE_SIZE];
*/
#include <xen/config.h>
+#include <xen/cache.h>
#include <asm/page.h>
#include <asm/percpu.h>
#undef ENTRY
__init_end = .;
__per_cpu_shift = PERCPU_SHIFT; /* kdump assist */
- __per_cpu_start = .;
- .data.percpu : { *(.data.percpu) } :text
- __per_cpu_data_end = .;
+ .data.percpu : {
+ __per_cpu_start = .;
+ *(.data.percpu)
+ . = ALIGN(SMP_CACHE_BYTES);
+ *(.data.percpu.read_mostly)
+ __per_cpu_data_end = .;
+ } :text
. = __per_cpu_start + (NR_CPUS << PERCPU_SHIFT);
. = ALIGN(PAGE_SIZE);
__per_cpu_end = .;
unsigned int m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
#endif
-DEFINE_PER_CPU(void *, compat_arg_xlat);
+DEFINE_PER_CPU_READ_MOSTLY(void *, compat_arg_xlat);
/* Top-level master (and idle-domain) page directory. */
l4_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
/* Modified for x86-64 Xen by Keir Fraser */
#include <xen/config.h>
+#include <xen/cache.h>
#include <asm/page.h>
#include <asm/percpu.h>
#undef ENTRY
__init_end = .;
__per_cpu_shift = PERCPU_SHIFT; /* kdump assist */
- __per_cpu_start = .;
- .data.percpu : { *(.data.percpu) } :text
- __per_cpu_data_end = .;
+ .data.percpu : {
+ __per_cpu_start = .;
+ *(.data.percpu)
+ . = ALIGN(SMP_CACHE_BYTES);
+ *(.data.percpu.read_mostly)
+ __per_cpu_data_end = .;
+ } :text
. = __per_cpu_start + (NR_CPUS << PERCPU_SHIFT);
. = ALIGN(PAGE_SIZE);
__per_cpu_end = .;
#include <compat/kexec.h>
#endif
-static DEFINE_PER_CPU(void *, crash_notes);
+static DEFINE_PER_CPU_READ_MOSTLY(void *, crash_notes);
static Elf_Note *xen_crash_note;
* allocated iff opt_tmem_compress */
#define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
#define LZO_DSTMEM_PAGES 2
-static DEFINE_PER_CPU(unsigned char *, workmem);
-static DEFINE_PER_CPU(unsigned char *, dstmem);
+static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, workmem);
+static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, dstmem);
#ifdef COMPARE_COPY_PAGE_SSE2
#include <asm/flushtlb.h> /* REMOVE ME AFTER TEST */
integer_param("tbuf_size", opt_tbuf_size);
/* Pointers to the meta-data objects for all system trace buffers */
-static DEFINE_PER_CPU(struct t_buf *, t_bufs);
-static DEFINE_PER_CPU(unsigned char *, t_data);
+static DEFINE_PER_CPU_READ_MOSTLY(struct t_buf *, t_bufs);
+static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, t_data);
static int data_size;
/* High water mark for trace buffers; */
extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
- __attribute__((__section__(".data.percpu"))) \
- __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
+#define __DEFINE_PER_CPU(type, name, suffix) \
+ __attribute__((__section__(".data.percpu" #suffix))) \
+ __SMALL_ADDR_AREA __typeof__(type) per_cpu_##name
/*
* Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
#define PERCPU_SIZE (1UL << PERCPU_SHIFT)
/* Separate out the type, so (int[3], foo) works. */
-#define DEFINE_PER_CPU(type, name) \
- __attribute__((__section__(".data.percpu"))) \
- __typeof__(type) per_cpu__##name
+#define __DEFINE_PER_CPU(type, name, suffix) \
+ __attribute__((__section__(".data.percpu" #suffix))) \
+ __typeof__(type) per_cpu_##name
/* var is in discarded region: offset to particular copy we want */
#define per_cpu(var, cpu) \
#include <xen/config.h>
#include <asm/percpu.h>
+/*
+ * Separate out the type, so (int[3], foo) works.
+ *
+ * The _##name concatenation is being used here to prevent 'name' from getting
+ * macro expanded, while still allowing a per-architecture symbol name prefix.
+ */
+#define DEFINE_PER_CPU(type, name) __DEFINE_PER_CPU(type, _##name, )
+#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
+ __DEFINE_PER_CPU(type, _##name, .read_mostly)
+
/* Preferred on Xen. Also see arch-defined per_cpu(). */
#define this_cpu(var) __get_cpu_var(var)